Q5: Open Question

There are 8 models in this notebook. Model 5 is the best one.

In [1]:
%load_ext autoreload
%autoreload 2
%matplotlib inline
In [2]:
from keras import layers
from keras import models
from keras import optimizers
from keras.applications import VGG16
from keras.regularizers import l1, l2, l1_l2
Using TensorFlow backend.
In [3]:
import json
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np

from keras.preprocessing.image import ImageDataGenerator
In [14]:
import pickle
import seaborn as sns
import pandas as pd
import glob
import matplotlib.image as mpimg
In [5]:
root = '/userhome/34/ljiang/deep_learning/A2/Datasets/cat_dog_car_bike/'
train_dir = os.path.join(root, 'train')
val_dir = os.path.join(root, 'val')
test_dir = os.path.join(root, 'test')
In [77]:
def plot_history(history):
    acc = history.history['acc']
    val_acc = history.history['val_acc']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs = range(1, len(acc) + 1)
    plt.figure(1, figsize=(10, 10))
    plt.subplot(211)
    plt.plot(epochs, acc, 'bo', label='Training acc')
    plt.plot(epochs, val_acc, 'b', label='Validation acc')
    plt.title('Training and validation accuracy')
    plt.legend()
    plt.subplot(212)
    plt.plot(epochs, loss, 'bo', label='Training loss')
    plt.plot(epochs, val_loss, 'b', label='Validation loss')
    plt.title('Training and validation loss')
    plt.legend()
    plt.show()
In [78]:
MAPPING = {
    0: 'cat',
    1: 'dog',
    2: 'car',
    3: 'motorbike'
}

def save_history(history, index=None):
    if index is None:
        index = len(glob.glob('/userhome/34/ljiang/deep_learning/A2/ass2/history/*.p'))
    with open('/userhome/34/ljiang/deep_learning/A2/ass2/history/history-%d.p' % index, 'wb') as f:
        pickle.dump(history, f)

def plot_pickle(index):
    path = '/userhome/34/ljiang/deep_learning/A2/ass2/history/history-%d.p' % index
    with open(path, 'rb') as f:
        history = pickle.load(f)
        plot_history(history)
        
def plot_error(model, data_generator):
    y_predict = model.predict_generator(data_generator)
    for i in np.nonzero([np.argmax(y) for y in y_predict] != data_generator.classes)[0]:
        file_name = data_generator.filenames[i]
        file_path = os.path.join(data_generator.directory, file_name)
        plt.imshow(mpimg.imread(file_path))
        plt.title('%s: %s\n(%s)' % (file_name, MAPPING[np.argmax(y_predict[i])], list(y_predict[i])))
        plt.show()
        
def load_model(file_name, root='/userhome/34/ljiang/deep_learning/A2/ass2/'):
    return models.load_model(os.path.join(root, file_name))

Model 1

In [164]:
aug_train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
    train_dir,
    target_size=(224, 224),
    batch_size=20,
    class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
    val_dir,
    target_size=(224, 224),
    batch_size=20,
    class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
    test_dir,
    target_size=(224, 224),
    batch_size=1,
    class_mode='categorical',
    shuffle=False)

def best_model():
    conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
    set_trainable = False
    for layer in conv_base.layers:
        if layer.name == 'block5_conv1':
            set_trainable = True
        layer.trainable = set_trainable
    
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(4, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(learning_rate=1e-4),
              metrics=['acc'])
    return model

model_q5 = best_model()
history_q5 = model_q5.fit_generator(
    aug_train_generator,
    steps_per_epoch=100,
    epochs=30,
    validation_data=val_generator,
    validation_steps=50)
Found 1675 images belonging to 4 classes.
Found 835 images belonging to 4 classes.
Found 200 images belonging to 4 classes.
Epoch 1/30
100/100 [==============================] - 139s 1s/step - loss: 0.4330 - acc: 0.8475 - val_loss: 0.0171 - val_acc: 0.9735
Epoch 2/30
100/100 [==============================] - 135s 1s/step - loss: 0.1244 - acc: 0.9560 - val_loss: 0.0930 - val_acc: 0.9790
Epoch 3/30
100/100 [==============================] - 135s 1s/step - loss: 0.0876 - acc: 0.9688 - val_loss: 0.0471 - val_acc: 0.9612
Epoch 4/30
100/100 [==============================] - 128s 1s/step - loss: 0.0704 - acc: 0.9738 - val_loss: 0.1188 - val_acc: 0.9790
Epoch 5/30
100/100 [==============================] - 130s 1s/step - loss: 0.0588 - acc: 0.9795 - val_loss: 0.0894 - val_acc: 0.9788
Epoch 6/30
100/100 [==============================] - 131s 1s/step - loss: 0.0714 - acc: 0.9795 - val_loss: 0.0024 - val_acc: 0.9821
Epoch 7/30
100/100 [==============================] - 125s 1s/step - loss: 0.0490 - acc: 0.9867 - val_loss: 0.0151 - val_acc: 0.9699
Epoch 8/30
100/100 [==============================] - 142s 1s/step - loss: 0.0505 - acc: 0.9848 - val_loss: 0.0954 - val_acc: 0.9741
Epoch 9/30
100/100 [==============================] - 133s 1s/step - loss: 0.0521 - acc: 0.9862 - val_loss: 0.1188 - val_acc: 0.9797
Epoch 10/30
100/100 [==============================] - 136s 1s/step - loss: 0.0622 - acc: 0.9877 - val_loss: 4.4817e-04 - val_acc: 0.9864
Epoch 11/30
100/100 [==============================] - 133s 1s/step - loss: 0.0577 - acc: 0.9877 - val_loss: 0.7998 - val_acc: 0.9212
Epoch 12/30
100/100 [==============================] - 134s 1s/step - loss: 0.0584 - acc: 0.9846 - val_loss: 0.1455 - val_acc: 0.9818
Epoch 13/30
100/100 [==============================] - 132s 1s/step - loss: 0.0493 - acc: 0.9883 - val_loss: 0.0061 - val_acc: 0.9814
Epoch 14/30
100/100 [==============================] - 125s 1s/step - loss: 0.0483 - acc: 0.9902 - val_loss: 0.0010 - val_acc: 0.9807
Epoch 15/30
100/100 [==============================] - 139s 1s/step - loss: 0.0675 - acc: 0.9888 - val_loss: 0.3996 - val_acc: 0.9785
Epoch 16/30
100/100 [==============================] - 129s 1s/step - loss: 0.0621 - acc: 0.9893 - val_loss: 1.1728 - val_acc: 0.9743
Epoch 17/30
100/100 [==============================] - 131s 1s/step - loss: 0.0624 - acc: 0.9888 - val_loss: 0.0838 - val_acc: 0.9901
Epoch 18/30
100/100 [==============================] - 150s 1s/step - loss: 0.0570 - acc: 0.9885 - val_loss: 0.3025 - val_acc: 0.9848
Epoch 19/30
100/100 [==============================] - 141s 1s/step - loss: 0.0646 - acc: 0.9894 - val_loss: 0.1067 - val_acc: 0.9556
Epoch 20/30
100/100 [==============================] - 140s 1s/step - loss: 0.0526 - acc: 0.9904 - val_loss: 0.1919 - val_acc: 0.9783
Epoch 21/30
100/100 [==============================] - 135s 1s/step - loss: 0.0576 - acc: 0.9865 - val_loss: 0.0000e+00 - val_acc: 0.9709
Epoch 22/30
100/100 [==============================] - 146s 1s/step - loss: 0.0508 - acc: 0.9922 - val_loss: 0.1869 - val_acc: 0.9771
Epoch 23/30
100/100 [==============================] - 138s 1s/step - loss: 0.0508 - acc: 0.9918 - val_loss: 8.4982e-05 - val_acc: 0.9848
Epoch 24/30
100/100 [==============================] - 140s 1s/step - loss: 0.0703 - acc: 0.9893 - val_loss: 0.6789 - val_acc: 0.9629
Epoch 25/30
100/100 [==============================] - 146s 1s/step - loss: 0.1033 - acc: 0.9863 - val_loss: 0.0312 - val_acc: 0.9712
Epoch 26/30
100/100 [==============================] - 142s 1s/step - loss: 0.0339 - acc: 0.9929 - val_loss: 0.0877 - val_acc: 0.9838
Epoch 27/30
100/100 [==============================] - 145s 1s/step - loss: 0.0681 - acc: 0.9903 - val_loss: 0.3502 - val_acc: 0.9794
Epoch 28/30
100/100 [==============================] - 143s 1s/step - loss: 0.0523 - acc: 0.9882 - val_loss: 0.0000e+00 - val_acc: 0.9648
Epoch 29/30
100/100 [==============================] - 145s 1s/step - loss: 0.0774 - acc: 0.9905 - val_loss: 0.0702 - val_acc: 0.9864
Epoch 30/30
100/100 [==============================] - 146s 1s/step - loss: 0.0815 - acc: 0.9911 - val_loss: 0.1055 - val_acc: 0.9797
In [165]:
# model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_1')
In [169]:
model_q5.summary()
Model: "sequential_72"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
vgg16 (Model)                (None, 7, 7, 512)         14714688  
_________________________________________________________________
flatten_61 (Flatten)         (None, 25088)             0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 25088)             0         
_________________________________________________________________
dense_131 (Dense)            (None, 512)               12845568  
_________________________________________________________________
dense_132 (Dense)            (None, 4)                 2052      
=================================================================
Total params: 27,562,308
Trainable params: 19,927,044
Non-trainable params: 7,635,264
_________________________________________________________________
In [166]:
model_q5.evaluate_generator(test_generator)
Out[166]:
[0.0, 0.9649999737739563]
In [168]:
plot_history(hs[0]['history'])
In [71]:
model = load_model('q5_model_1')
plot_error(model, test_generator)

Model 2:降低batch size

In [172]:
aug_train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
    train_dir,
    target_size=(224, 224),
    batch_size=32,
    class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
    val_dir,
    target_size=(224, 224),
    batch_size=32,
    class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
    test_dir,
    target_size=(224, 224),
    batch_size=1,
    class_mode='categorical',
    shuffle=False)

def best_model():
    conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
    set_trainable = False
    for layer in conv_base.layers:
        if layer.name == 'block5_conv1':
            set_trainable = True
        layer.trainable = set_trainable
    
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(4, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(learning_rate=1e-4),
              metrics=['acc'])
    return model

model_q5 = best_model()
history_q5 = model_q5.fit_generator(
    aug_train_generator,
    steps_per_epoch=100,
    epochs=30,
    validation_data=val_generator,
    validation_steps=50)
Found 1675 images belonging to 4 classes.
Found 835 images belonging to 4 classes.
Found 200 images belonging to 4 classes.
Epoch 1/30
100/100 [==============================] - 107s 1s/step - loss: 0.4985 - acc: 0.8008 - val_loss: 0.0934 - val_acc: 0.8931
Epoch 2/30
100/100 [==============================] - 74s 741ms/step - loss: 0.1621 - acc: 0.9386 - val_loss: 0.0935 - val_acc: 0.9767
Epoch 3/30
100/100 [==============================] - 68s 681ms/step - loss: 0.1607 - acc: 0.9487 - val_loss: 0.1362 - val_acc: 0.9708
Epoch 4/30
100/100 [==============================] - 68s 683ms/step - loss: 0.1031 - acc: 0.9645 - val_loss: 0.0083 - val_acc: 0.9741
Epoch 5/30
100/100 [==============================] - 69s 690ms/step - loss: 0.1065 - acc: 0.9670 - val_loss: 0.0063 - val_acc: 0.9838
Epoch 6/30
100/100 [==============================] - 68s 675ms/step - loss: 0.1058 - acc: 0.9664 - val_loss: 0.5148 - val_acc: 0.9293
Epoch 7/30
100/100 [==============================] - 68s 677ms/step - loss: 0.0893 - acc: 0.9725 - val_loss: 0.1066 - val_acc: 0.9414
Epoch 8/30
100/100 [==============================] - 66s 665ms/step - loss: 0.1113 - acc: 0.9718 - val_loss: 0.0191 - val_acc: 0.9851
Epoch 9/30
100/100 [==============================] - 67s 672ms/step - loss: 0.0769 - acc: 0.9813 - val_loss: 0.1861 - val_acc: 0.9747
Epoch 10/30
100/100 [==============================] - 75s 746ms/step - loss: 0.1002 - acc: 0.9734 - val_loss: 8.5073e-04 - val_acc: 0.9792
Epoch 11/30
100/100 [==============================] - 70s 697ms/step - loss: 0.0881 - acc: 0.9816 - val_loss: 0.0568 - val_acc: 0.9656
Epoch 12/30
100/100 [==============================] - 70s 696ms/step - loss: 0.0815 - acc: 0.9811 - val_loss: 0.0141 - val_acc: 0.9760
Epoch 13/30
100/100 [==============================] - 67s 673ms/step - loss: 0.0981 - acc: 0.9782 - val_loss: 1.1176e-08 - val_acc: 0.9825
Epoch 14/30
100/100 [==============================] - 65s 651ms/step - loss: 0.0989 - acc: 0.9769 - val_loss: 0.1624 - val_acc: 0.9605
Epoch 15/30
100/100 [==============================] - 70s 705ms/step - loss: 0.0735 - acc: 0.9845 - val_loss: 0.0280 - val_acc: 0.9805
Epoch 16/30
100/100 [==============================] - 68s 682ms/step - loss: 0.0965 - acc: 0.9834 - val_loss: 0.0000e+00 - val_acc: 0.9818
Epoch 17/30
100/100 [==============================] - 69s 693ms/step - loss: 0.0987 - acc: 0.9777 - val_loss: 1.8281 - val_acc: 0.9760
Epoch 18/30
100/100 [==============================] - 66s 665ms/step - loss: 0.0867 - acc: 0.9854 - val_loss: 0.0040 - val_acc: 0.9747
Epoch 19/30
100/100 [==============================] - 73s 726ms/step - loss: 0.1122 - acc: 0.9829 - val_loss: 0.0373 - val_acc: 0.9715
Epoch 20/30
100/100 [==============================] - 65s 649ms/step - loss: 0.0981 - acc: 0.9794 - val_loss: 0.0120 - val_acc: 0.9747
Epoch 21/30
100/100 [==============================] - 68s 679ms/step - loss: 0.0635 - acc: 0.9835 - val_loss: 0.8140 - val_acc: 0.9784
Epoch 22/30
100/100 [==============================] - 69s 692ms/step - loss: 0.0770 - acc: 0.9835 - val_loss: 0.4104 - val_acc: 0.9728
Epoch 23/30
100/100 [==============================] - 71s 711ms/step - loss: 0.1049 - acc: 0.9858 - val_loss: 2.8796e-06 - val_acc: 0.9818
Epoch 24/30
100/100 [==============================] - 71s 714ms/step - loss: 0.0728 - acc: 0.9836 - val_loss: 0.1133 - val_acc: 0.9864
Epoch 25/30
100/100 [==============================] - 68s 681ms/step - loss: 0.1506 - acc: 0.9845 - val_loss: 0.4417 - val_acc: 0.9773
Epoch 26/30
100/100 [==============================] - 68s 676ms/step - loss: 0.0913 - acc: 0.9816 - val_loss: 0.0367 - val_acc: 0.9377
Epoch 27/30
100/100 [==============================] - 65s 650ms/step - loss: 0.0809 - acc: 0.9829 - val_loss: 6.8697e-05 - val_acc: 0.9585
Epoch 28/30
100/100 [==============================] - 67s 671ms/step - loss: 0.0940 - acc: 0.9820 - val_loss: 0.0234 - val_acc: 0.9739
Epoch 29/30
100/100 [==============================] - 64s 642ms/step - loss: 0.1727 - acc: 0.9789 - val_loss: 0.0023 - val_acc: 0.9838
Epoch 30/30
100/100 [==============================] - 66s 664ms/step - loss: 0.1474 - acc: 0.9825 - val_loss: 0.0041 - val_acc: 0.9754
In [174]:
# model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_2')
In [176]:
model_q5.summary()
Model: "sequential_73"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
vgg16 (Model)                (None, 7, 7, 512)         14714688  
_________________________________________________________________
flatten_62 (Flatten)         (None, 25088)             0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 25088)             0         
_________________________________________________________________
dense_133 (Dense)            (None, 512)               12845568  
_________________________________________________________________
dense_134 (Dense)            (None, 4)                 2052      
=================================================================
Total params: 27,562,308
Trainable params: 19,927,044
Non-trainable params: 7,635,264
_________________________________________________________________
In [173]:
model_q5.evaluate_generator(test_generator)
Out[173]:
[0.0, 0.9700000286102295]
In [175]:
plot_history(hs[1]['history'])
In [69]:
model = load_model('q5_model_2')
plot_error(model, test_generator)

Model 3:不要dropout

In [177]:
aug_train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
    train_dir,
    target_size=(224, 224),
    batch_size=32,
    class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
    val_dir,
    target_size=(224, 224),
    batch_size=32,
    class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
    test_dir,
    target_size=(224, 224),
    batch_size=1,
    class_mode='categorical',
    shuffle=False)

def best_model():
    conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
    set_trainable = False
    for layer in conv_base.layers:
        if layer.name == 'block5_conv1':
            set_trainable = True
        layer.trainable = set_trainable
    
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(4, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(learning_rate=1e-4),
              metrics=['acc'])
    return model

model_q5 = best_model()
history_q5 = model_q5.fit_generator(
    aug_train_generator,
    steps_per_epoch=100,
    epochs=30,
    validation_data=val_generator,
    validation_steps=50)
hs.append({
    'model': model_q5,
    'history': history_q5,
})
Found 1675 images belonging to 4 classes.
Found 835 images belonging to 4 classes.
Found 200 images belonging to 4 classes.
Epoch 1/30
100/100 [==============================] - 76s 759ms/step - loss: 0.4565 - acc: 0.8238 - val_loss: 0.2007 - val_acc: 0.9325
Epoch 2/30
100/100 [==============================] - 68s 675ms/step - loss: 0.1521 - acc: 0.9433 - val_loss: 0.0121 - val_acc: 0.9747
Epoch 3/30
100/100 [==============================] - 65s 652ms/step - loss: 0.1033 - acc: 0.9648 - val_loss: 0.0091 - val_acc: 0.9715
Epoch 4/30
100/100 [==============================] - 65s 646ms/step - loss: 0.1012 - acc: 0.9704 - val_loss: 0.0357 - val_acc: 0.9825
Epoch 5/30
100/100 [==============================] - 66s 658ms/step - loss: 0.0947 - acc: 0.9718 - val_loss: 0.0516 - val_acc: 0.9734
Epoch 6/30
100/100 [==============================] - 65s 655ms/step - loss: 0.0769 - acc: 0.9777 - val_loss: 0.1949 - val_acc: 0.9715
Epoch 7/30
100/100 [==============================] - 66s 661ms/step - loss: 0.0643 - acc: 0.9816 - val_loss: 0.0166 - val_acc: 0.9796
Epoch 8/30
100/100 [==============================] - 68s 684ms/step - loss: 0.0782 - acc: 0.9790 - val_loss: 5.2154e-07 - val_acc: 0.9851
Epoch 9/30
100/100 [==============================] - 63s 627ms/step - loss: 0.0868 - acc: 0.9827 - val_loss: 4.1821e-04 - val_acc: 0.9741
Epoch 10/30
100/100 [==============================] - 70s 700ms/step - loss: 0.0925 - acc: 0.9820 - val_loss: 6.4591e-06 - val_acc: 0.9689
Epoch 11/30
100/100 [==============================] - 70s 698ms/step - loss: 0.0783 - acc: 0.9827 - val_loss: 0.0113 - val_acc: 0.9844
Epoch 12/30
100/100 [==============================] - 67s 666ms/step - loss: 0.0812 - acc: 0.9799 - val_loss: 0.0017 - val_acc: 0.9630
Epoch 13/30
100/100 [==============================] - 67s 674ms/step - loss: 0.0977 - acc: 0.9785 - val_loss: 0.0038 - val_acc: 0.9676
Epoch 14/30
100/100 [==============================] - 66s 664ms/step - loss: 0.0576 - acc: 0.9827 - val_loss: 6.6866e-04 - val_acc: 0.9758
Epoch 15/30
100/100 [==============================] - 68s 685ms/step - loss: 0.0601 - acc: 0.9851 - val_loss: 0.0017 - val_acc: 0.9825
Epoch 16/30
100/100 [==============================] - 69s 691ms/step - loss: 0.1697 - acc: 0.9816 - val_loss: 0.0448 - val_acc: 0.9695
Epoch 17/30
100/100 [==============================] - 67s 666ms/step - loss: 0.0692 - acc: 0.9829 - val_loss: 2.5704e-07 - val_acc: 0.9792
Epoch 18/30
100/100 [==============================] - 68s 684ms/step - loss: 0.0933 - acc: 0.9820 - val_loss: 0.0029 - val_acc: 0.9728
Epoch 19/30
100/100 [==============================] - 72s 722ms/step - loss: 0.1612 - acc: 0.9826 - val_loss: 0.3983 - val_acc: 0.9702
Epoch 20/30
100/100 [==============================] - 68s 678ms/step - loss: 0.0724 - acc: 0.9839 - val_loss: 0.0020 - val_acc: 0.9721
Epoch 21/30
100/100 [==============================] - 68s 677ms/step - loss: 0.0569 - acc: 0.9892 - val_loss: 0.7835 - val_acc: 0.9523
Epoch 22/30
100/100 [==============================] - 69s 685ms/step - loss: 0.0896 - acc: 0.9829 - val_loss: 1.1572 - val_acc: 0.9715
Epoch 23/30
100/100 [==============================] - 66s 657ms/step - loss: 0.0998 - acc: 0.9840 - val_loss: 0.0037 - val_acc: 0.9767
Epoch 24/30
100/100 [==============================] - 67s 671ms/step - loss: 0.0827 - acc: 0.9857 - val_loss: 0.0706 - val_acc: 0.9818
Epoch 25/30
100/100 [==============================] - 70s 698ms/step - loss: 0.0727 - acc: 0.9833 - val_loss: 0.2753 - val_acc: 0.9747
Epoch 26/30
100/100 [==============================] - 68s 679ms/step - loss: 0.0766 - acc: 0.9854 - val_loss: 0.9335 - val_acc: 0.9695
Epoch 27/30
100/100 [==============================] - 66s 663ms/step - loss: 0.0866 - acc: 0.9839 - val_loss: 0.0000e+00 - val_acc: 0.9831
Epoch 28/30
100/100 [==============================] - 71s 706ms/step - loss: 0.0750 - acc: 0.9884 - val_loss: 2.0535 - val_acc: 0.9796
Epoch 29/30
100/100 [==============================] - 70s 700ms/step - loss: 0.0531 - acc: 0.9857 - val_loss: 0.0000e+00 - val_acc: 0.9702
Epoch 30/30
100/100 [==============================] - 70s 701ms/step - loss: 0.1575 - acc: 0.9864 - val_loss: 1.1466 - val_acc: 0.9643
In [178]:
# model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_3')
In [179]:
model_q5.summary()
Model: "sequential_74"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
vgg16 (Model)                (None, 7, 7, 512)         14714688  
_________________________________________________________________
flatten_63 (Flatten)         (None, 25088)             0         
_________________________________________________________________
dense_135 (Dense)            (None, 512)               12845568  
_________________________________________________________________
dense_136 (Dense)            (None, 4)                 2052      
=================================================================
Total params: 27,562,308
Trainable params: 19,927,044
Non-trainable params: 7,635,264
_________________________________________________________________
In [180]:
model_q5.evaluate_generator(test_generator)
Out[180]:
[0.0, 0.9549999833106995]
In [181]:
plot_history(history_q5)
In [70]:
model = load_model('q5_model_3')
plot_error(model, test_generator)

Model 4: dropout在relu之后

(block4_conv1 开始多训练几层 效果很差!!!)

In [84]:
aug_train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
    train_dir,
    target_size=(256, 256),
    batch_size=32,
    class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
    val_dir,
    target_size=(256, 256),
    batch_size=32,
    class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
    test_dir,
    target_size=(256, 256),
    batch_size=1,
    class_mode='categorical',
    shuffle=False)

def best_model():
    conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(256, 256, 3))
    set_trainable = False
    for layer in conv_base.layers:
        if layer.name == 'block5_conv1':
            set_trainable = True
        layer.trainable = set_trainable
    
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(4, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(learning_rate=1e-4),
              metrics=['acc'])
    return model

model_q5 = best_model()
history_q5 = model_q5.fit_generator(
    aug_train_generator,
    steps_per_epoch=100,
    epochs=30,
    validation_data=val_generator,
    validation_steps=50)

model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_4')
save_history(history_q5, 4)
Found 1675 images belonging to 4 classes.
Found 835 images belonging to 4 classes.
Found 200 images belonging to 4 classes.
Epoch 1/30
100/100 [==============================] - 90s 899ms/step - loss: 0.4938 - acc: 0.8030 - val_loss: 0.0588 - val_acc: 0.9714
Epoch 2/30
100/100 [==============================] - 84s 837ms/step - loss: 0.1684 - acc: 0.9414 - val_loss: 0.1493 - val_acc: 0.9637
Epoch 3/30
100/100 [==============================] - 83s 827ms/step - loss: 0.1308 - acc: 0.9572 - val_loss: 0.0455 - val_acc: 0.9663
Epoch 4/30
100/100 [==============================] - 85s 851ms/step - loss: 0.1470 - acc: 0.9544 - val_loss: 0.0110 - val_acc: 0.9734
Epoch 5/30
100/100 [==============================] - 82s 823ms/step - loss: 0.1020 - acc: 0.9683 - val_loss: 0.0222 - val_acc: 0.9844
Epoch 6/30
100/100 [==============================] - 85s 851ms/step - loss: 0.1195 - acc: 0.9709 - val_loss: 0.0208 - val_acc: 0.9754
Epoch 7/30
100/100 [==============================] - 87s 865ms/step - loss: 0.0966 - acc: 0.9715 - val_loss: 0.0596 - val_acc: 0.9637
Epoch 8/30
100/100 [==============================] - 81s 812ms/step - loss: 0.0765 - acc: 0.9756 - val_loss: 0.1267 - val_acc: 0.9617
Epoch 9/30
100/100 [==============================] - 80s 802ms/step - loss: 0.1064 - acc: 0.9744 - val_loss: 0.0023 - val_acc: 0.9598
Epoch 10/30
100/100 [==============================] - 84s 838ms/step - loss: 0.1049 - acc: 0.9744 - val_loss: 0.0862 - val_acc: 0.9702
Epoch 11/30
100/100 [==============================] - 84s 838ms/step - loss: 0.1160 - acc: 0.9763 - val_loss: 0.0000e+00 - val_acc: 0.9851
Epoch 12/30
100/100 [==============================] - 79s 786ms/step - loss: 0.0735 - acc: 0.9820 - val_loss: 1.0915e-06 - val_acc: 0.9844
Epoch 13/30
100/100 [==============================] - 82s 817ms/step - loss: 0.0870 - acc: 0.9770 - val_loss: 1.8626e-08 - val_acc: 0.9792
Epoch 14/30
100/100 [==============================] - 83s 835ms/step - loss: 0.0844 - acc: 0.9782 - val_loss: 4.2017e-05 - val_acc: 0.9854
Epoch 15/30
100/100 [==============================] - 83s 828ms/step - loss: 0.0617 - acc: 0.9790 - val_loss: 0.2406 - val_acc: 0.9682
Epoch 16/30
100/100 [==============================] - 81s 807ms/step - loss: 0.0897 - acc: 0.9849 - val_loss: 0.3017 - val_acc: 0.9780
Epoch 17/30
100/100 [==============================] - 83s 830ms/step - loss: 0.1439 - acc: 0.9810 - val_loss: 0.3154 - val_acc: 0.9572
Epoch 18/30
100/100 [==============================] - 83s 835ms/step - loss: 0.1463 - acc: 0.9756 - val_loss: 1.0216 - val_acc: 0.9339
Epoch 20/30
100/100 [==============================] - 84s 843ms/step - loss: 0.0758 - acc: 0.9839 - val_loss: 2.8096e-04 - val_acc: 0.9780
Epoch 21/30
100/100 [==============================] - 82s 821ms/step - loss: 0.1184 - acc: 0.9842 - val_loss: 1.0108 - val_acc: 0.9777
Epoch 22/30
100/100 [==============================] - 83s 831ms/step - loss: 0.0558 - acc: 0.9867 - val_loss: 0.0199 - val_acc: 0.9792
Epoch 23/30
100/100 [==============================] - 90s 899ms/step - loss: 0.1576 - acc: 0.9835 - val_loss: 7.0139e-06 - val_acc: 0.9780
Epoch 24/30
100/100 [==============================] - 84s 843ms/step - loss: 0.1280 - acc: 0.9786 - val_loss: 0.7738 - val_acc: 0.9728
Epoch 25/30
100/100 [==============================] - 86s 859ms/step - loss: 0.0869 - acc: 0.9861 - val_loss: 0.2550 - val_acc: 0.9747
Epoch 26/30
100/100 [==============================] - 82s 820ms/step - loss: 0.0572 - acc: 0.9867 - val_loss: 0.6033 - val_acc: 0.9786
Epoch 27/30
100/100 [==============================] - 82s 819ms/step - loss: 0.1533 - acc: 0.9791 - val_loss: 0.0000e+00 - val_acc: 0.9708
Epoch 28/30
100/100 [==============================] - 85s 848ms/step - loss: 0.0936 - acc: 0.9855 - val_loss: 1.3411e-07 - val_acc: 0.9720
Epoch 29/30
100/100 [==============================] - 82s 825ms/step - loss: 0.1187 - acc: 0.9828 - val_loss: 3.3592 - val_acc: 0.9650
Epoch 30/30
100/100 [==============================] - 85s 848ms/step - loss: 0.0814 - acc: 0.9865 - val_loss: 1.2554e-06 - val_acc: 0.9676
In [88]:
model_q5.summary()
Model: "sequential_3"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
vgg16 (Model)                (None, 8, 8, 512)         14714688  
_________________________________________________________________
flatten_3 (Flatten)          (None, 32768)             0         
_________________________________________________________________
dense_5 (Dense)              (None, 512)               16777728  
_________________________________________________________________
dropout_3 (Dropout)          (None, 512)               0         
_________________________________________________________________
dense_6 (Dense)              (None, 4)                 2052      
=================================================================
Total params: 31,494,468
Trainable params: 23,859,204
Non-trainable params: 7,635,264
_________________________________________________________________
In [89]:
model_q5.evaluate_generator(test_generator)
Out[89]:
[0.0, 0.9549999833106995]
In [90]:
plot_history(history_q5)

Model 5: 加batch_normalization layer, dropout (Best)

In [91]:
aug_train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
    train_dir,
    target_size=(224, 224),
    batch_size=32,
    class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
    val_dir,
    target_size=(224, 224),
    batch_size=32,
    class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
    test_dir,
    target_size=(224, 224),
    batch_size=1,
    class_mode='categorical',
    shuffle=False)

def best_model():
    conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
    set_trainable = False
    for layer in conv_base.layers:
        if layer.name == 'block5_conv1':
            set_trainable = True
        layer.trainable = set_trainable
    
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.BatchNormalization())
    model.add(layers.Dense(4, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(learning_rate=1e-4),
              metrics=['acc'])
    return model

model_q5 = best_model()
history_q5 = model_q5.fit_generator(
    aug_train_generator,
    steps_per_epoch=100,
    epochs=30,
    validation_data=val_generator,
    validation_steps=50)

model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_5')
save_history(history_q5, 5)
Found 1675 images belonging to 4 classes.
Found 835 images belonging to 4 classes.
Found 200 images belonging to 4 classes.
Epoch 1/30
100/100 [==============================] - 83s 831ms/step - loss: 0.2918 - acc: 0.8762 - val_loss: 0.0633 - val_acc: 0.9726
Epoch 2/30
100/100 [==============================] - 70s 699ms/step - loss: 0.1180 - acc: 0.9592 - val_loss: 0.4167 - val_acc: 0.9436
Epoch 3/30
100/100 [==============================] - 73s 729ms/step - loss: 0.0951 - acc: 0.9690 - val_loss: 0.0538 - val_acc: 0.9540
Epoch 4/30
100/100 [==============================] - 69s 690ms/step - loss: 0.0723 - acc: 0.9755 - val_loss: 0.0708 - val_acc: 0.9818
Epoch 5/30
100/100 [==============================] - 70s 704ms/step - loss: 0.0763 - acc: 0.9709 - val_loss: 8.4715e-04 - val_acc: 0.9754
Epoch 6/30
100/100 [==============================] - 67s 675ms/step - loss: 0.0572 - acc: 0.9788 - val_loss: 0.4300 - val_acc: 0.9721
Epoch 7/30
100/100 [==============================] - 69s 686ms/step - loss: 0.0472 - acc: 0.9848 - val_loss: 0.1024 - val_acc: 0.9841
Epoch 8/30
100/100 [==============================] - 70s 704ms/step - loss: 0.0463 - acc: 0.9851 - val_loss: 0.0878 - val_acc: 0.9786
Epoch 9/30
100/100 [==============================] - 64s 642ms/step - loss: 0.0551 - acc: 0.9810 - val_loss: 0.3738 - val_acc: 0.9572
Epoch 10/30
100/100 [==============================] - 71s 711ms/step - loss: 0.0467 - acc: 0.9854 - val_loss: 0.0119 - val_acc: 0.9669
Epoch 11/30
100/100 [==============================] - 69s 689ms/step - loss: 0.0351 - acc: 0.9896 - val_loss: 0.3914 - val_acc: 0.9501
Epoch 12/30
100/100 [==============================] - 68s 676ms/step - loss: 0.0346 - acc: 0.9883 - val_loss: 0.0567 - val_acc: 0.9637
Epoch 13/30
100/100 [==============================] - 68s 676ms/step - loss: 0.0270 - acc: 0.9896 - val_loss: 0.0047 - val_acc: 0.9695
Epoch 14/30
100/100 [==============================] - 67s 667ms/step - loss: 0.0353 - acc: 0.9896 - val_loss: 0.1151 - val_acc: 0.9694
Epoch 15/30
100/100 [==============================] - 67s 673ms/step - loss: 0.0394 - acc: 0.9877 - val_loss: 0.4567 - val_acc: 0.9663
Epoch 16/30
100/100 [==============================] - 66s 660ms/step - loss: 0.0272 - acc: 0.9899 - val_loss: 0.0183 - val_acc: 0.9818
Epoch 17/30
100/100 [==============================] - 66s 659ms/step - loss: 0.0296 - acc: 0.9886 - val_loss: 0.0140 - val_acc: 0.9780
Epoch 18/30
100/100 [==============================] - 65s 654ms/step - loss: 0.0197 - acc: 0.9927 - val_loss: 0.0055 - val_acc: 0.9831
Epoch 19/30
100/100 [==============================] - 71s 715ms/step - loss: 0.0252 - acc: 0.9924 - val_loss: 0.0012 - val_acc: 0.9870
Epoch 20/30
100/100 [==============================] - 66s 658ms/step - loss: 0.0196 - acc: 0.9934 - val_loss: 0.0489 - val_acc: 0.9786
Epoch 21/30
100/100 [==============================] - 67s 674ms/step - loss: 0.0221 - acc: 0.9924 - val_loss: 0.0033 - val_acc: 0.9854
Epoch 22/30
100/100 [==============================] - 68s 684ms/step - loss: 0.0309 - acc: 0.9889 - val_loss: 0.4488 - val_acc: 0.9805
Epoch 23/30
100/100 [==============================] - 67s 669ms/step - loss: 0.0156 - acc: 0.9940 - val_loss: 0.0017 - val_acc: 0.9754
Epoch 24/30
100/100 [==============================] - 66s 660ms/step - loss: 0.0173 - acc: 0.9939 - val_loss: 0.1932 - val_acc: 0.9792
Epoch 25/30
100/100 [==============================] - 67s 673ms/step - loss: 0.0247 - acc: 0.9899 - val_loss: 3.7317e-04 - val_acc: 0.9896
Epoch 26/30
100/100 [==============================] - 68s 679ms/step - loss: 0.0141 - acc: 0.9953 - val_loss: 0.0048 - val_acc: 0.9728
Epoch 27/30
100/100 [==============================] - 64s 635ms/step - loss: 0.0143 - acc: 0.9953 - val_loss: 7.9473e-08 - val_acc: 0.9851
Epoch 28/30
100/100 [==============================] - 71s 715ms/step - loss: 0.0180 - acc: 0.9937 - val_loss: 7.2592e-04 - val_acc: 0.9860
Epoch 29/30
100/100 [==============================] - 68s 676ms/step - loss: 0.0272 - acc: 0.9946 - val_loss: 0.3597 - val_acc: 0.9734
Epoch 30/30
100/100 [==============================] - 67s 671ms/step - loss: 0.0188 - acc: 0.9934 - val_loss: 0.0057 - val_acc: 0.9805
In [95]:
model_q5.summary()
Model: "sequential_4"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
vgg16 (Model)                (None, 7, 7, 512)         14714688  
_________________________________________________________________
flatten_4 (Flatten)          (None, 25088)             0         
_________________________________________________________________
dense_7 (Dense)              (None, 512)               12845568  
_________________________________________________________________
dropout_4 (Dropout)          (None, 512)               0         
_________________________________________________________________
batch_normalization_1 (Batch (None, 512)               2048      
_________________________________________________________________
dense_8 (Dense)              (None, 4)                 2052      
=================================================================
Total params: 27,564,356
Trainable params: 19,928,068
Non-trainable params: 7,636,288
_________________________________________________________________
In [96]:
model_q5.evaluate_generator(test_generator)
Out[96]:
[1.1920928244535389e-07, 0.9950000047683716]
In [97]:
plot_history(history_q5)
In [144]:
plot_error(load_model('q5_model_5'), test_generator)

Model 6: 加batch_normalization layer,不要dropout

In [142]:
aug_train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
    train_dir,
    target_size=(224, 224),
    batch_size=32,
    class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
    val_dir,
    target_size=(224, 224),
    batch_size=32,
    class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
    test_dir,
    target_size=(224, 224),
    batch_size=1,
    class_mode='categorical',
    shuffle=False)

def best_model():
    conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
    set_trainable = False
    for layer in conv_base.layers:
        if layer.name == 'block5_conv1':
            set_trainable = True
        layer.trainable = set_trainable
    
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation='relu'))
#     model.add(layers.Dropout(0.5))
    model.add(layers.BatchNormalization())
    model.add(layers.Dense(4, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(learning_rate=1e-4),
              metrics=['acc'])
    return model

model_q5 = best_model()
history_q5 = model_q5.fit_generator(
    aug_train_generator,
    steps_per_epoch=100,
    epochs=30,
    validation_data=val_generator,
    validation_steps=50)

model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_6')
save_history(history_q5, 6)
Found 1675 images belonging to 4 classes.
Found 835 images belonging to 4 classes.
Found 200 images belonging to 4 classes.
In [109]:
model_q5.summary()
Model: "sequential_6"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
vgg16 (Model)                (None, 7, 7, 512)         14714688  
_________________________________________________________________
flatten_6 (Flatten)          (None, 25088)             0         
_________________________________________________________________
dense_11 (Dense)             (None, 512)               12845568  
_________________________________________________________________
batch_normalization_3 (Batch (None, 512)               2048      
_________________________________________________________________
dense_12 (Dense)             (None, 4)                 2052      
=================================================================
Total params: 27,564,356
Trainable params: 19,928,068
Non-trainable params: 7,636,288
_________________________________________________________________
In [110]:
model_q5.evaluate_generator(test_generator)
Out[110]:
[0.0, 0.9750000238418579]
In [111]:
plot_history(history_q5)
In [143]:
model = load_model('q5_model_6')
plot_error(model, test_generator)

Model 7: model 5 + vertical flip + 20 epoch

In [145]:
aug_train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    vertical_flip=True,
    fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
    train_dir,
    target_size=(224, 224),
    batch_size=32,
    class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
    val_dir,
    target_size=(224, 224),
    batch_size=32,
    class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
    test_dir,
    target_size=(224, 224),
    batch_size=1,
    class_mode='categorical',
    shuffle=False)

def best_model():
    conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
    set_trainable = False
    for layer in conv_base.layers:
        if layer.name == 'block5_conv1':
            set_trainable = True
        layer.trainable = set_trainable
    
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.BatchNormalization())
    model.add(layers.Dense(4, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(learning_rate=1e-4),
              metrics=['acc'])
    return model

model_q5 = best_model()
history_q5 = model_q5.fit_generator(
    aug_train_generator,
    steps_per_epoch=100,
    epochs=20,
    validation_data=val_generator,
    validation_steps=50)

model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_7')
save_history(history_q5, 7)
Found 1675 images belonging to 4 classes.
Found 835 images belonging to 4 classes.
Found 200 images belonging to 4 classes.
Epoch 1/20
100/100 [==============================] - 77s 769ms/step - loss: 0.3517 - acc: 0.8515 - val_loss: 0.0409 - val_acc: 0.9497
Epoch 2/20
100/100 [==============================] - 73s 727ms/step - loss: 0.1975 - acc: 0.9269 - val_loss: 0.3927 - val_acc: 0.9332
Epoch 3/20
100/100 [==============================] - 70s 701ms/step - loss: 0.1410 - acc: 0.9440 - val_loss: 0.0445 - val_acc: 0.9559
Epoch 4/20
100/100 [==============================] - 71s 712ms/step - loss: 0.1252 - acc: 0.9509 - val_loss: 1.0473 - val_acc: 0.8106
Epoch 5/20
100/100 [==============================] - 73s 728ms/step - loss: 0.1071 - acc: 0.9591 - val_loss: 0.0011 - val_acc: 0.9728
Epoch 6/20
100/100 [==============================] - 72s 721ms/step - loss: 0.1007 - acc: 0.9633 - val_loss: 0.2011 - val_acc: 0.9734
Epoch 7/20
100/100 [==============================] - 70s 701ms/step - loss: 0.0969 - acc: 0.9614 - val_loss: 0.1621 - val_acc: 0.9408
Epoch 8/20
100/100 [==============================] - 71s 715ms/step - loss: 0.0717 - acc: 0.9764 - val_loss: 0.0269 - val_acc: 0.9695
Epoch 9/20
100/100 [==============================] - 75s 750ms/step - loss: 0.0743 - acc: 0.9707 - val_loss: 0.2888 - val_acc: 0.9650
Epoch 10/20
100/100 [==============================] - 73s 733ms/step - loss: 0.0749 - acc: 0.9718 - val_loss: 0.2983 - val_acc: 0.9715
Epoch 11/20
100/100 [==============================] - 74s 735ms/step - loss: 0.0741 - acc: 0.9704 - val_loss: 0.1104 - val_acc: 0.9741
Epoch 12/20
100/100 [==============================] - 70s 702ms/step - loss: 0.0662 - acc: 0.9742 - val_loss: 0.0745 - val_acc: 0.9682
Epoch 13/20
100/100 [==============================] - 75s 750ms/step - loss: 0.0698 - acc: 0.9721 - val_loss: 0.0571 - val_acc: 0.9423
Epoch 14/20
100/100 [==============================] - 72s 719ms/step - loss: 0.0623 - acc: 0.9770 - val_loss: 0.0415 - val_acc: 0.9109
Epoch 15/20
100/100 [==============================] - 73s 727ms/step - loss: 0.0504 - acc: 0.9801 - val_loss: 0.0260 - val_acc: 0.9591
Epoch 16/20
100/100 [==============================] - 74s 741ms/step - loss: 0.0489 - acc: 0.9823 - val_loss: 0.0060 - val_acc: 0.9728
Epoch 17/20
100/100 [==============================] - 67s 675ms/step - loss: 0.0367 - acc: 0.9861 - val_loss: 0.5374 - val_acc: 0.9591
Epoch 18/20
100/100 [==============================] - 72s 719ms/step - loss: 0.0497 - acc: 0.9842 - val_loss: 0.1830 - val_acc: 0.9741
Epoch 19/20
100/100 [==============================] - 78s 782ms/step - loss: 0.0469 - acc: 0.9835 - val_loss: 1.8379e-04 - val_acc: 0.9831
Epoch 20/20
100/100 [==============================] - 72s 725ms/step - loss: 0.0418 - acc: 0.9880 - val_loss: 0.0155 - val_acc: 0.9663
In [150]:
model_q5.summary()
Model: "sequential_11"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
vgg16 (Model)                (None, 7, 7, 512)         14714688  
_________________________________________________________________
flatten_11 (Flatten)         (None, 25088)             0         
_________________________________________________________________
dense_21 (Dense)             (None, 512)               12845568  
_________________________________________________________________
dropout_5 (Dropout)          (None, 512)               0         
_________________________________________________________________
batch_normalization_7 (Batch (None, 512)               2048      
_________________________________________________________________
dense_22 (Dense)             (None, 4)                 2052      
=================================================================
Total params: 27,564,356
Trainable params: 19,928,068
Non-trainable params: 7,636,288
_________________________________________________________________
In [151]:
model_q5.evaluate_generator(test_generator)
Out[151]:
[0.0, 0.9800000190734863]
In [152]:
plot_history(history_q5)
In [153]:
# model = load_model('q5_model_3')
plot_error(model_q5, test_generator)

Model 8: model 5 + vertical flip + 20 epoch + 256

In [165]:
aug_train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')
aug_train_generator = aug_train_datagen.flow_from_directory(
    train_dir,
    target_size=(256, 256),
    batch_size=32,
    class_mode='categorical')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
    val_dir,
    target_size=(256, 256),
    batch_size=32,
    class_mode='categorical')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
    test_dir,
    target_size=(256, 256),
    batch_size=1,
    class_mode='categorical',
    shuffle=False)

def best_model():
    conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(256, 256, 3))
    set_trainable = False
    for layer in conv_base.layers:
        if layer.name == 'block5_conv1':
            set_trainable = True
        layer.trainable = set_trainable
    
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.BatchNormalization())
    model.add(layers.Dense(4, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(learning_rate=1e-4),
              metrics=['acc'])
    return model

model_q5 = best_model()
history_q5 = model_q5.fit_generator(
    aug_train_generator,
    steps_per_epoch=100,
    epochs=20,
    validation_data=val_generator,
    validation_steps=50)

model_q5.save('/userhome/34/ljiang/deep_learning/A2/ass2/q5_model_8')
save_history(history_q5, 8)
Found 1675 images belonging to 4 classes.
Found 835 images belonging to 4 classes.
Found 200 images belonging to 4 classes.
Epoch 1/20
100/100 [==============================] - 91s 908ms/step - loss: 0.2859 - acc: 0.8854 - val_loss: 0.6128 - val_acc: 0.9300
Epoch 2/20
100/100 [==============================] - 86s 864ms/step - loss: 0.1152 - acc: 0.9557 - val_loss: 0.0513 - val_acc: 0.9760
Epoch 3/20
100/100 [==============================] - 87s 873ms/step - loss: 0.0931 - acc: 0.9649 - val_loss: 0.0411 - val_acc: 0.9708
Epoch 4/20
100/100 [==============================] - 84s 843ms/step - loss: 0.0771 - acc: 0.9706 - val_loss: 0.0020 - val_acc: 0.9682
Epoch 5/20
100/100 [==============================] - 91s 908ms/step - loss: 0.0706 - acc: 0.9744 - val_loss: 0.0852 - val_acc: 0.9630
Epoch 6/20
100/100 [==============================] - 81s 808ms/step - loss: 0.0570 - acc: 0.9805 - val_loss: 2.7515e-04 - val_acc: 0.9818
Epoch 7/20
100/100 [==============================] - 89s 890ms/step - loss: 0.0485 - acc: 0.9851 - val_loss: 0.1983 - val_acc: 0.9376
Epoch 8/20
100/100 [==============================] - 92s 922ms/step - loss: 0.0461 - acc: 0.9845 - val_loss: 0.1514 - val_acc: 0.9442
Epoch 9/20
100/100 [==============================] - 87s 868ms/step - loss: 0.0485 - acc: 0.9794 - val_loss: 0.1299 - val_acc: 0.9637
Epoch 10/20
100/100 [==============================] - 95s 953ms/step - loss: 0.0552 - acc: 0.9807 - val_loss: 0.0015 - val_acc: 0.9922
Epoch 11/20
100/100 [==============================] - 89s 894ms/step - loss: 0.0350 - acc: 0.9861 - val_loss: 0.0125 - val_acc: 0.9890
Epoch 12/20
100/100 [==============================] - 90s 905ms/step - loss: 0.0318 - acc: 0.9874 - val_loss: 0.1335 - val_acc: 0.9831
Epoch 13/20
100/100 [==============================] - 89s 887ms/step - loss: 0.0287 - acc: 0.9911 - val_loss: 2.0671 - val_acc: 0.8320
Epoch 14/20
100/100 [==============================] - 87s 869ms/step - loss: 0.0389 - acc: 0.9851 - val_loss: 0.0224 - val_acc: 0.9631
Epoch 15/20
100/100 [==============================] - 94s 941ms/step - loss: 0.0257 - acc: 0.9911 - val_loss: 0.1761 - val_acc: 0.9747
Epoch 16/20
100/100 [==============================] - 85s 852ms/step - loss: 0.0254 - acc: 0.9899 - val_loss: 0.0290 - val_acc: 0.9689
Epoch 17/20
100/100 [==============================] - 92s 916ms/step - loss: 0.0269 - acc: 0.9908 - val_loss: 0.0013 - val_acc: 0.9734
Epoch 18/20
100/100 [==============================] - 84s 840ms/step - loss: 0.0272 - acc: 0.9889 - val_loss: 0.0235 - val_acc: 0.9870
Epoch 19/20
100/100 [==============================] - 91s 907ms/step - loss: 0.0171 - acc: 0.9953 - val_loss: 0.1609 - val_acc: 0.9831
Epoch 20/20
100/100 [==============================] - 86s 864ms/step - loss: 0.0257 - acc: 0.9896 - val_loss: 0.0061 - val_acc: 0.9617
In [166]:
model_q5.summary()
Model: "sequential_15"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
vgg16 (Model)                (None, 8, 8, 512)         14714688  
_________________________________________________________________
flatten_15 (Flatten)         (None, 32768)             0         
_________________________________________________________________
dense_29 (Dense)             (None, 512)               16777728  
_________________________________________________________________
dropout_9 (Dropout)          (None, 512)               0         
_________________________________________________________________
batch_normalization_11 (Batc (None, 512)               2048      
_________________________________________________________________
dense_30 (Dense)             (None, 4)                 2052      
=================================================================
Total params: 31,496,516
Trainable params: 23,860,228
Non-trainable params: 7,636,288
_________________________________________________________________
In [167]:
model_q5.evaluate_generator(test_generator)
Out[167]:
[0.0002489972102921456, 0.9549999833106995]
In [168]:
plot_history(history_q5)
In [169]:
# model = load_model('q5_model_3')
plot_error(model_q5, test_generator)